# This is a BitKeeper generated patch for the following project:
# Project Name: Linux kernel tree
# This patch format is intended for GNU patch command version 2.5 or higher.
# This patch includes the following deltas:
#	           ChangeSet	1.1069.175.5 -> 1.1069.175.6
#	include/asm-ia64/pgtable.h	1.12    -> 1.13   
#	include/linux/hugetlb.h	1.3     -> 1.4    
#	arch/ia64/mm/hugetlbpage.c	1.4     -> 1.5    
#	           mm/mmap.c	1.29.1.8 -> 1.29.1.9
#
# The following is the BitKeeper ChangeSet Log
# --------------------------------------------
# 04/01/08	kaos@sgi.com	1.1069.1.235
# [PATCH] ia64: fix deadlock in ia64_mca_cmc_int_caller()
# 
# smp_call_function() must not be called from interrupt context (can
# deadlock on tasklist_lock).  Use keventd to call smp_call_function().
# --------------------------------------------
# 04/01/08	tony.luck@intel.com	1.1069.1.236
# [PATCH] ia64: enable recovery from TLB errors
# 
# Here's the updated version of the MCA TLB recovery patch.
# --------------------------------------------
# 04/01/08	kaos@sgi.com	1.1069.1.237
# [PATCH] ia64: Avoid double clear of CMC/CPE records
# 
# Credit to Ben Woodard <ben@zork.net>.
# --------------------------------------------
# 04/01/08	steiner@sgi.com	1.1069.1.238
# [PATCH] ia64: fix ia64_ctx.lock deadlock
# 
# I hit a deadlock involving the ia64_ctx.lock. The lock
# may be taken in interrupt context to process an IPI from smp_flush_tlb_mm.
# --------------------------------------------
# 04/01/08	arun.sharma@intel.com	1.1069.1.239
# [PATCH] ia64: ia32 sigaltstack() fix
# 
# The attached patch fixes a bug introduced by the earlier patch to
# handle the differences between ia32 and ia64 in the definition of
# MINSIGSTKSZ.
# --------------------------------------------
# 04/01/08	kenneth.w.chen@intel.com	1.1069.175.6
# ia64: hugepage_free_pgtables() bug-fix
# 
# 	When there are two huge page mappings, like the two in the example
# 	below, first one at the end of PGDIR_SIZE, and second one starts at
# 	next PGDIR_SIZE (64GB with 16K page size):
# 
# 	8000000ff0000000-8000001000000000 rw-s
# 	8000001000000000-8000001010000000 rw-s
# 
# 	Unmapping the first vma would trick free_pgtable to think it
# 	can remove one set of pgd indexed at 0x400, and it went ahead
# 	purge the entire pmd/pte that are still in use by the second
# 	mapping. Now any subsequent access to pmd/pte for the second
# 	active mapping will trigger the bug.  We've seen hard kernel
# 	hang on some platform, some other platform will generate MCA,
# 	plus all kinds of unpleasant result.
# --------------------------------------------
#
diff -Nru a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
--- a/arch/ia64/mm/hugetlbpage.c	Thu Jan  8 16:34:21 2004
+++ b/arch/ia64/mm/hugetlbpage.c	Thu Jan  8 16:34:21 2004
@@ -123,6 +123,58 @@
 	return 0;
 }
 
+/*
+ * Same as generic free_pgtables(), except constant PGDIR_* and pgd_offset
+ * are hugetlb region specific.
+ */
+void hugetlb_free_pgtables(struct mm_struct * mm, struct vm_area_struct *prev,
+	unsigned long start, unsigned long end)
+{
+	unsigned long first = start & HUGETLB_PGDIR_MASK;
+	unsigned long last = end + HUGETLB_PGDIR_SIZE - 1;
+	unsigned long start_index, end_index;
+
+	if (!prev) {
+		prev = mm->mmap;
+		if (!prev)
+			goto no_mmaps;
+		if (prev->vm_end > start) {
+			if (last > prev->vm_start)
+				last = prev->vm_start;
+			goto no_mmaps;
+		}
+	}
+	for (;;) {
+		struct vm_area_struct *next = prev->vm_next;
+
+		if (next) {
+			if (next->vm_start < start) {
+				prev = next;
+				continue;
+			}
+			if (last > next->vm_start)
+				last = next->vm_start;
+		}
+		if (prev->vm_end > first)
+			first = prev->vm_end + HUGETLB_PGDIR_SIZE - 1;
+		break;
+	}
+no_mmaps:
+	if (last < first)
+		return;
+	/*
+	 * If the PGD bits are not consecutive in the virtual address, the
+	 * old method of shifting the VA >> by PGDIR_SHIFT doesn't work.
+	 */
+	start_index = pgd_index(htlbpage_to_page(first));
+	end_index = pgd_index(htlbpage_to_page(last));
+	if (end_index > start_index) {
+		clear_page_tables(mm, start_index, end_index - start_index);
+		flush_tlb_pgtables(mm, first & HUGETLB_PGDIR_MASK,
+				   last & HUGETLB_PGDIR_MASK);
+	}
+}
+
 int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
 			struct vm_area_struct *vma)
 {
diff -Nru a/include/asm-ia64/pgtable.h b/include/asm-ia64/pgtable.h
--- a/include/asm-ia64/pgtable.h	Thu Jan  8 16:34:21 2004
+++ b/include/asm-ia64/pgtable.h	Thu Jan  8 16:34:21 2004
@@ -431,6 +431,12 @@
 /* We provide our own get_unmapped_area to cope with VA holes for userland */
 #define HAVE_ARCH_UNMAPPED_AREA
 
+#ifdef CONFIG_HUGETLB_PAGE
+#define HUGETLB_PGDIR_SHIFT	(HPAGE_SHIFT + 2*(PAGE_SHIFT-3))
+#define HUGETLB_PGDIR_SIZE	(__IA64_UL(1) << HUGETLB_PGDIR_SHIFT)
+#define HUGETLB_PGDIR_MASK	(~(HUGETLB_PGDIR_SIZE-1))
+#endif
+
 /*
  * No page table caches to initialise
  */
diff -Nru a/include/linux/hugetlb.h b/include/linux/hugetlb.h
--- a/include/linux/hugetlb.h	Thu Jan  8 16:34:21 2004
+++ b/include/linux/hugetlb.h	Thu Jan  8 16:34:21 2004
@@ -25,6 +25,8 @@
 int hugetlb_report_meminfo(char *);
 int is_hugepage_mem_enough(size_t);
 int is_aligned_hugepage_range(unsigned long addr, unsigned long len);
+void hugetlb_free_pgtables(struct mm_struct * mm, struct vm_area_struct * prev,
+	unsigned long start, unsigned long end);
 
 extern int htlbpage_max;
 
@@ -44,6 +46,7 @@
 #define is_hugepage_mem_enough(size)		0
 #define is_hugepage_addr(addr)			0
 #define is_aligned_hugepage_range(addr, len)    0
+#define hugetlb_free_pgtables(mm, prev, start, end) do { } while (0)
 
 #endif /* !CONFIG_HUGETLB_PAGE */
 
diff -Nru a/mm/mmap.c b/mm/mmap.c
--- a/mm/mmap.c	Thu Jan  8 16:34:21 2004
+++ b/mm/mmap.c	Thu Jan  8 16:34:21 2004
@@ -1044,7 +1044,10 @@
 	if (extra)
 		kmem_cache_free(vm_area_cachep, extra);
 
-	free_pgtables(mm, prev, addr, addr+len);
+	if (is_hugepage_addr(addr))
+		hugetlb_free_pgtables(mm, prev, addr, addr+len);
+	else
+		free_pgtables(mm, prev, addr, addr+len);
 
 	return 0;
 }